The goals / steps of this project are the following:
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from ipywidgets import interact, interactive, fixed
%matplotlib inline
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('./camera_cal/calibration*.jpg')
fig, axes = plt.subplots(5,4, figsize=(30, 30))
axes = axes.ravel()
# Step through the list and search for chessboard corners
for i,fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
axes[i].axis('off')
axes[i].imshow(img)
img = cv2.imread('./camera_cal/calibration10.jpg')
#img_size = (img.shape[1], img.shape[0])
#Camera calibration, given object points, image points, and the shape of the grayscale image:
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
undist = cv2.undistort(img, mtx, dist, None, mtx)
# Saving the camera calibration parameters
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
dist_pickle["rvecs"] = rvecs
dist_pickle["tvecs"] = tvecs
pickle.dump( dist_pickle, open( "CameraCalibration.p", "wb" ) )
# Making a method for the pipeline that will be made in later section
def undistort(img):
undist_img = cv2.undistort(img, mtx, dist, None, mtx)
return undist_img
# list of test images
images = glob.glob('test_images/test*.jpg')
f, axarr = plt.subplots(6, 2, figsize=(20, 40))
undist_images = []
#Undistorting a test image:
for i, fname in enumerate(images):
img = mpimg.imread(fname,0)
undist = cv2.undistort(img, mtx, dist, None, mtx)
undist_images.append(undist)
axarr[i,0].axis('off')
axarr[i,0].set_title('Original Image', fontsize=15)
axarr[i,0].imshow(img)
axarr[i,1].axis('off')
axarr[i,1].set_title('Undistorted Image', fontsize=15)
axarr[i,1].imshow(undist)
# HLS Color space
def hls_color_thresh(img, threshLow, threshHigh):
imgHLS = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
binary_output = np.zeros((img.shape[0], img.shape[1]))
binary_output[(imgHLS[:,:,0] >= threshLow[0]) & (imgHLS[:,:,0] <= threshHigh[0]) & (imgHLS[:,:,1] >= threshLow[1]) & (imgHLS[:,:,1] <= threshHigh[1]) & (imgHLS[:,:,2] >= threshLow[2]) & (imgHLS[:,:,2] <= threshHigh[2])] = 1
return binary_output
#Sobel in x direction & Magnitude threshold
def sobel_x(img, sobel_kernel=3,min_thres = 20, max_thres =100):
# Apply the following steps to img
# 1) Convert to grayscale
imghsl = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# 2) Take the gradient in x and y separately
#Channels L and S from HLS
sobelx1 = cv2.Sobel(imghsl[:,:,1], cv2.CV_64F, 1,0, ksize=sobel_kernel)
sobelx2 = cv2.Sobel(imghsl[:,:,2], cv2.CV_64F, 1,0, ksize=sobel_kernel)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobelx1 = np.uint8(255*sobelx1/ np.max(sobelx1))
scaled_sobelx2 = np.uint8(255*sobelx2/ np.max(sobelx2))
# 5) Create a binary mask where mag thresholds are met
binary_outputx1 = np.zeros_like(scaled_sobelx1)
binary_outputx1[(scaled_sobelx1 >= min_thres) & (scaled_sobelx1 <= max_thres)] = 1
binary_outputx2 = np.zeros_like(scaled_sobelx2)
binary_outputx2[(scaled_sobelx2 >= min_thres) & (scaled_sobelx2 <= max_thres)] = 1
binary_output = np.zeros_like(scaled_sobelx1)
binary_output[(binary_outputx1 ==1) | (binary_outputx2 ==1)]=1
# 6) Return this mask as your binary_output image
return binary_output
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=sobel_kernel)
# 3) Calculate the magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255*gradmag / np.max(gradmag))
# 5) Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
#Direction threshold
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1,0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0,1, ksize=sobel_kernel)
# 3) Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
# 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient
absgraddir = np.arctan2(abs_sobely, abs_sobelx)
# 5) Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# 6) Return this mask as your binary_output image
return binary_output
#Both Magnitude and direction threshold
def mag_dir_thresh(img, sobel_kernel=3, mag_thresh=(0, 255), dir_thresh=(0,np.pi/2)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(img, cv2.CV_64F, 1,0, ksize=sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0,1, ksize=sobel_kernel)
# 3) Calculate the magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
#Calc angle
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
absgraddir = np.arctan2(abs_sobely, abs_sobelx)
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255*gradmag / np.max(gradmag))
# 5) Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1]) & (absgraddir >= dir_thresh[0]) & (absgraddir <= dir_thresh[1]) ] = 1
# 6) Return this mask as binary_output image
return binary_output
#Examples of magnitude and direction thresholds
plt.figure(figsize=(10,8))
img = cv2.imread("test_images/test1.jpg")
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
magThr =mag_thresh(imgRGB, 3, (50, 100))
dirThr =dir_threshold(imgRGB, 9,(np.pi/240/90, np.pi/2*60/90))
#Sobel x only
imgThr = sobel_x(imgRGB,9,80,220) #Sobel x
print("Examples of magnitude and direction thresholds")
plt.figure(figsize=(40,20))
plt.subplot(5,1,1)
plt.title('Original Image')
fig =plt.imshow(imgRGB)
plt.subplot(5,1,2)
plt.title('Magnitude threshold Image')
fig =plt.imshow(magThr,cmap = 'gray')
plt.subplot(5,1,3)
plt.title('Direction threshold Image')
fig =plt.imshow(dirThr,cmap = 'gray')
plt.subplot(5,1,4)
plt.title('Sobel x only threshold Image')
fig =plt.imshow(imgThr,cmap = 'gray')
# on test images
img = cv2.imread("test_images/test1.jpg")
imgHLS = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
plt.figure(figsize=(10,8))
plt.subplot(3,1,1)
plt.title('Test image H')
fig =plt.imshow(imgHLS[:,:,0],cmap='gray')
plt.subplot(3,1,2)
plt.title('Test Image L')
fig =plt.imshow(imgHLS[:,:,1],cmap='gray')
plt.subplot(3,1,3)
plt.title('Test image S')
fig =plt.imshow(imgHLS[:,:,2],cmap='gray')
#Perspective transfomation
src = np.float32([[585, 450], [204, 720], [1126, 720], [695, 450]])
dst = np.float32([[320, 0], [320, 720], [960,720], [960, 0]])
M_persp = cv2.getPerspectiveTransform(src, dst)
Minv_persp = cv2.getPerspectiveTransform(dst, src)
img_size = (imgThr.shape[1], imgThr.shape[0])
binary_warped = cv2.warpPerspective(imgThr, M_persp, img_size, flags=cv2.INTER_LINEAR)
plt.figure(figsize=(30,20))
plt.subplot(4,1,1)
plt.title('Binary image')
fig =plt.imshow(imgThr, cmap='gray')
plt.subplot(4,1,2)
plt.title('Binary perspective')
fig =plt.imshow(binary_warped, cmap='gray')
# Warp image
def warp(img, src, dst):
h,w = img.shape[:2]
# use cv2.getPerspectiveTransform() to get M, the transform matrix, and Minv, the inverse
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
# use cv2.warpPerspective() to warp your image to a top-down view
warped = cv2.warpPerspective(img, M, (w,h), flags=cv2.INTER_LINEAR)
return warped, M, Minv
h,w = undist_images[5].shape[:2]
# # define source and destination points for transform for test3.jpg
src = np.float32([(575,475),
(740,475),
(260,685),
(1075,685)])
dst = np.float32([(450,0),
(w-450,0),
(450,h),
(w-450,h)])
undist_images_warp, M, Minv = warp(undist_images[5], src, dst)
# Visualize warp
f, (axes1, axes2) = plt.subplots(1, 2, figsize=(20,10))
f.subplots_adjust(hspace = .2, wspace=.05)
axes1.imshow(undist_images[5])
x = [src[0][0],src[2][0],src[3][0],src[1][0],src[0][0]]
y = [src[0][1],src[2][1],src[3][1],src[1][1],src[0][1]]
axes1.plot(x,y, color='#44ffff', alpha=0.5, linewidth=4, solid_capstyle='round', zorder=2)
axes1.imshow(undist_images[5])
axes1.set_ylim([h,0])
axes1.set_xlim([0,w])
axes1.set_title('Undistorted Image', fontsize=20)
axes2.imshow(undist_images_warp)
axes2.set_title('warped Image', fontsize=20)
def fitlines(binary_warped):
# Assuming you have created a warped binary image called "binary_warped"
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[np.int(binary_warped.shape[0]/2):,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
if len(leftx) == 0:
left_fit =[]
else:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) == 0:
right_fit =[]
else:
right_fit = np.polyfit(righty, rightx, 2)
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
return left_fit, right_fit,out_img
#Visualization of lines fitted
img = cv2.imread("test_images/test1.jpg")
#img = cv2.imread("test_images/straight_lines2.jpg")
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_undist = cv2.undistort(imgRGB, mtx, dist, None, mtx)
#2.Magnitude Threshold
#Threshold color
yellow_low = np.array([0,100,100])
yellow_high = np.array([50,255,255])
white_low = np.array([18,0,180])
white_high = np.array([255,80,255])
global ref_left
global ref_right
global left_fit
global right_fit
imgThres_yellow = hls_color_thresh(img_undist,yellow_low,yellow_high)
imgThres_white = hls_color_thresh(img_undist,white_low,white_high)
imgThr_sobelx = sobel_x(img_undist,9,80,220) #Sobel x only
img_mag_thr =np.zeros_like(imgThres_yellow)
img_mag_thr[(imgThres_yellow==1) | (imgThres_white==1) | (imgThr_sobelx==1)] =1
img_mag_thr[(imgThres_yellow==1) | (imgThres_white==1)] =1
#3. Birds-eye
#Perspective array from before
img_size = (img_mag_thr.shape[1], img_mag_thr.shape[0])
binary_warped = cv2.warpPerspective(img_mag_thr, M_persp, img_size, flags=cv2.INTER_LINEAR)
left_fit, right_fit,out_img = fitlines(binary_warped)
print(out_img.shape)
print(np.max(out_img))
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
plt.figure(figsize=(30,20))
plt.subplot(3,1,1)
plt.imshow(binary_warped, cmap='gray')
plt.subplot(3,1,2)
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.subplot(3,1,2)
binary_warped2 = np.zeros((720, 1280,3))
binary_warped2[:,:,0] = binary_warped
binary_warped2[:,:,1] = binary_warped
binary_warped2[:,:,2] = binary_warped
plt.imshow(out_img)
result = cv2.addWeighted(binary_warped2, .8, out_img, .8, 0)
plt.imshow(result)
def fit_continuous(left_fit, right_fit, binary_warped):
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
if len(leftx) == 0:
left_fit_updated =[]
else:
left_fit_updated = np.polyfit(lefty, leftx, 2)
if len(rightx) == 0:
right_fit_updated =[]
else:
right_fit_updated = np.polyfit(righty, rightx, 2)
return left_fit_updated, right_fit_updated
def curvature(left_fit, right_fit, binary_warped):
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
y_eval = np.max(ploty)
# from estimate of US lane regulations (approx)
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Calculate the new radii of curvature
left_curverad = ((1 + (2*left_fit[0]*y_eval*ym_per_pix + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
right_curverad = ((1 + (2*right_fit[0]*y_eval*ym_per_pix + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
center = (((left_fit[0]*720**2+left_fit[1]*720+left_fit[2]) +(right_fit[0]*720**2+right_fit[1]*720+right_fit[2]) ) /2 - 640)*xm_per_pix
# Now our radius of curvature is in meters
print(left_curverad, 'm', right_curverad, 'm')
return left_curverad, right_curverad, center
def drawLine(undist, warped,left_fit, right_fit):
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
# Fit new polynomials to x,y in world space
left_fitx = left_fit[0]*ploty**2+left_fit[1]*ploty+left_fit[2]
right_fitx = right_fit[0]*ploty**2+right_fit[1]*ploty+right_fit[2]
#print(left_fitx)
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv_persp, (color_warp.shape[1], color_warp.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
return(result, color_warp)
global counter
counter=0
ref_left =np.array([-0.0001,0,400])
ref_right=np.array([-0.0001,0,1000])
left_fit =np.array([-0.0001,0,400])
right_fit=np.array([-0.0001,0,1000])
def process_image(image):
#1. Camera correction
#Calibration arrays pre-calculated
img_undist = cv2.undistort(image, mtx, dist, None, mtx)
global counter
#2.Magnitude Threshold
#Threshold color
yellow_low = np.array([0,100,100])
yellow_high = np.array([50,255,255])
white_low = np.array([18,0,180])
white_high = np.array([255,80,255])
global ref_left
global ref_right
global left_fit
global right_fit
imgThres_yellow = hls_color_thresh(img_undist,yellow_low,yellow_high)
imgThres_white = hls_color_thresh(img_undist,white_low,white_high)
imgThr_sobelx = sobel_x(img_undist,9,80,220) #Sobel x
img_mag_thr =np.zeros_like(imgThres_yellow)
#imgThresColor[(imgThres_yellow==1) | (imgThres_white==1)] =1
img_mag_thr[(imgThres_yellow==1) | (imgThres_white==1) | (imgThr_sobelx==1)] =1
#3. Birds-eye
#Perspective array pre-calculated
img_size = (img_mag_thr.shape[1], img_mag_thr.shape[0])
binary_warped = cv2.warpPerspective(img_mag_thr, M_persp, img_size, flags=cv2.INTER_LINEAR)
#4. Detect lanes and return fit curves
if counter==0:
left_fit, right_fit,out_imgfit = fitlines(binary_warped)
else:
left_fit, right_fit = fit_continuous(left_fit, right_fit, binary_warped)
status_sanity, d0, d1 =sanity_check(left_fit, right_fit, 0, .55)
#Calc curvature and center
if status_sanity == True:
#Save as last reliable fit
ref_left, ref_right = left_fit, right_fit
counter+=1
else: #Use the last realible fit
left_fit, right_fit = ref_left, ref_right
left_curv, right_curv, center_off = curvature(left_fit, right_fit, binary_warped)
#Warp back to original and merge with image
img_out, img_birds = drawLine(img_undist, binary_warped,left_fit, right_fit)
#Write curvature and center in image
TextL = "Left curv: " + str(int(left_curv)) + " m"
TextR = "Right curv: " + str(int(right_curv))+ " m"
TextC = "Center offset: " + str(round( center_off,2)) + "m"
fontScale=1
thickness=2
fontFace = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img_out, TextL, (130,40), fontFace, fontScale,(200,255,155), thickness, lineType = cv2.LINE_AA)
cv2.putText(img_out, TextR, (130,70), fontFace, fontScale,(200,255,155), thickness, lineType = cv2.LINE_AA)
cv2.putText(img_out, TextC, (130,100), fontFace, fontScale,(200,255,155), thickness, lineType = cv2.LINE_AA)
return img_out
def sanity_check(left_fit, right_fit, minSlope, maxSlope):
#Performs a sanity check on the lanes
#Check 1: check if left and right fits exists
#Check 2: Calculates the tangent between left and right in two points, and check if it is in a reasonable threshold
xm_per_pix = 3.7/700 # meters per pixel in x dimension
if len(left_fit) ==0 or len(right_fit) == 0:
status = False
d0=0
d1=0
#Previous fitlines routine returns empty list to them if not finds
else:
#Difference of slope
L_0 = 2*left_fit[0]*460+left_fit[1]
R_0 = 2*right_fit[0]*460+right_fit[1]
d0 = np.abs(L_0-R_0)
L_1 = 2*left_fit[0]*720+left_fit[1]
R_1 = 2*right_fit[0]*720+right_fit[1]
d1 = np.abs(L_1-R_1)
if d0>= minSlope and d0<= maxSlope and d1>= minSlope and d1<= maxSlope:
status = True
else:
status = False
return(status, d0, d1)
img = cv2.imread("test_images/test1.jpg")
#img = cv2.imread("test_images/straight_lines1.jpg")
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2= process_image(imgRGB)
plt.figure(figsize=(10,15))
#plt.figure(figsize=(5,10))
plt.figure(figsize=(30,20))
plt.imshow(img2)
from moviepy.editor import VideoFileClip
from IPython.display import HTML
import moviepy as mve
#Create video file pipeline
counter=0
output = 'out_test_video.mp4'
clip1 = VideoFileClip("project_video.mp4")
out_clip = clip1.fl_image(process_image)
%time out_clip.write_videofile(output, audio=False)
print(counter)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(output))